2024-11-13 18:37:55,731 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-13 18:37:55,759 main DEBUG Took 0.025839 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-13 18:37:55,760 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-13 18:37:55,760 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-13 18:37:55,761 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-13 18:37:55,762 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,771 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-13 18:37:55,787 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,789 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,790 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,791 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,791 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,792 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,793 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,793 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,794 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,794 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,795 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,796 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,797 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,797 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,798 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,798 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,799 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,799 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,800 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,801 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,802 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,802 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,803 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,803 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:37:55,804 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,805 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-13 18:37:55,807 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:37:55,808 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-13 18:37:55,811 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-13 18:37:55,811 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-13 18:37:55,813 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-13 18:37:55,813 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-13 18:37:55,825 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-13 18:37:55,829 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-13 18:37:55,831 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-13 18:37:55,832 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-13 18:37:55,832 main DEBUG createAppenders(={Console}) 2024-11-13 18:37:55,833 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 initialized 2024-11-13 18:37:55,833 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-13 18:37:55,834 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 OK. 2024-11-13 18:37:55,835 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-13 18:37:55,835 main DEBUG OutputStream closed 2024-11-13 18:37:55,835 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-13 18:37:55,836 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-13 18:37:55,836 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@25fb8912 OK 2024-11-13 18:37:55,931 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-13 18:37:55,933 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-13 18:37:55,934 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-13 18:37:55,935 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-13 18:37:55,936 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-13 18:37:55,936 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-13 18:37:55,936 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-13 18:37:55,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-13 18:37:55,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-13 18:37:55,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-13 18:37:55,938 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-13 18:37:55,938 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-13 18:37:55,938 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-13 18:37:55,939 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-13 18:37:55,939 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-13 18:37:55,939 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-13 18:37:55,940 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-13 18:37:55,941 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-13 18:37:55,943 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13 18:37:55,944 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@64a40280) with optional ClassLoader: null 2024-11-13 18:37:55,944 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-13 18:37:55,945 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@64a40280] started OK. 2024-11-13T18:37:55,967 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.master.balancer.TestBalancerDecision timeout: 13 mins 2024-11-13 18:37:55,971 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-13 18:37:55,972 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13T18:37:56,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-13T18:37:56,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:37:56,995 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=1218, ProcessCount=11, AvailableMemoryMB=711 2024-11-13T18:37:57,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-13T18:37:57,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:37:57,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-13T18:37:57,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=true, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:37:57,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:57,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:57,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:57,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:57,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:57,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:37:57,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:37:57,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:37:57,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:37:57,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:57,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv807342290=1, srv661524517=0} racks are {rack=0} 2024-11-13T18:37:57,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv807342290=1, srv661524517=0} racks are {rack=0} 2024-11-13T18:37:57,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:57,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1774050212=1, srv105171672=0} racks are {rack=0} 2024-11-13T18:37:57,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1774050212=1, srv105171672=0} racks are {rack=0} 2024-11-13T18:37:57,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:57,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv510848488=0, srv613680901=1} racks are {rack=0} 2024-11-13T18:37:57,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv510848488=0, srv613680901=1} racks are {rack=0} 2024-11-13T18:37:57,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:57,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv650995323=1, srv1763293048=0} racks are {rack=0} 2024-11-13T18:37:57,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:57,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv650995323=1, srv1763293048=0} racks are {rack=0} 2024-11-13T18:37:57,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv650995323=1, srv1763293048=0} racks are {rack=0} 2024-11-13T18:37:57,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:57,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1371675220=0, srv320653895=1} racks are {rack=0} 2024-11-13T18:37:57,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:57,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1371675220=0, srv320653895=1} racks are {rack=0} 2024-11-13T18:37:57,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:57,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1371675220=0, srv320653895=1} racks are {rack=0} 2024-11-13T18:37:57,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1371675220=0, srv320653895=1} racks are {rack=0} 2024-11-13T18:37:57,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv664685761=1, srv1927954728=0} racks are {rack=0} 2024-11-13T18:37:57,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1488377808=0, srv1661771396=1} racks are {rack=0} 2024-11-13T18:37:57,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:57,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:57,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:57,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:57,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:57,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:57,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:37:57,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:37:57,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:37:57,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:37:57,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1811141433=0, srv1891572002=1} racks are {rack=0} 2024-11-13T18:37:57,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-13T18:37:57,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-13T18:37:57,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-13T18:37:57,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-13T18:37:57,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-13T18:37:57,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-13T18:37:57,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-13T18:37:57,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-13T18:37:57,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-13T18:37:57,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-13T18:37:57,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-13T18:37:57,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-13T18:37:57,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-13T18:37:57,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-13T18:37:57,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-13T18:37:57,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-13T18:37:57,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-13T18:37:57,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-13T18:37:57,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-13T18:37:57,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-13T18:37:57,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-13T18:37:57,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-13T18:37:57,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-13T18:37:57,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-13T18:37:57,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-13T18:37:57,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-13T18:37:57,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-13T18:37:57,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-13T18:37:57,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-13T18:37:57,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-13T18:37:57,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-13T18:37:57,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-13T18:37:57,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-13T18:37:57,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-13T18:37:57,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-13T18:37:57,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-13T18:37:57,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-13T18:37:57,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-13T18:37:57,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-13T18:37:57,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-13T18:37:57,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-13T18:37:57,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-13T18:37:57,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-13T18:37:57,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-13T18:37:57,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-13T18:37:57,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-13T18:37:57,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-13T18:37:57,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-13T18:37:57,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-13T18:37:57,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-13T18:37:57,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-13T18:37:57,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-13T18:37:57,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-13T18:37:57,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-13T18:37:57,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-13T18:37:57,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-13T18:37:57,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-13T18:37:57,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-13T18:37:57,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-13T18:37:57,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-13T18:37:57,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-13T18:37:57,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-13T18:37:57,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-13T18:37:57,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-13T18:37:57,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-13T18:37:57,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-13T18:37:57,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-13T18:37:57,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-13T18:37:57,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-13T18:37:57,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-13T18:37:57,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-13T18:37:57,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-13T18:37:57,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-13T18:37:57,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-13T18:37:57,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-13T18:37:57,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-13T18:37:57,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-13T18:37:57,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-13T18:37:57,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-13T18:37:57,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-13T18:37:57,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-13T18:37:57,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-13T18:37:57,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-13T18:37:57,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-13T18:37:57,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-13T18:37:57,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-13T18:37:57,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-13T18:37:57,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-13T18:37:57,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-13T18:37:57,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-13T18:37:57,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-13T18:37:57,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-13T18:37:57,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-13T18:37:57,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-13T18:37:57,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-13T18:37:57,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-13T18:37:57,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-13T18:37:57,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-13T18:37:57,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-13T18:37:57,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-13T18:37:57,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-13T18:37:57,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-13T18:37:57,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-13T18:37:57,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-13T18:37:57,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-13T18:37:57,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-13T18:37:57,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-13T18:37:57,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-13T18:37:57,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-13T18:37:57,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-13T18:37:57,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-13T18:37:57,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-13T18:37:57,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-13T18:37:57,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-13T18:37:57,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-13T18:37:57,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-13T18:37:57,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-13T18:37:57,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-13T18:37:57,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-13T18:37:57,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-13T18:37:57,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-13T18:37:57,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-13T18:37:57,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-13T18:37:57,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-13T18:37:57,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-13T18:37:57,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-13T18:37:57,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-13T18:37:57,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-13T18:37:57,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-13T18:37:57,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-13T18:37:57,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-13T18:37:57,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-13T18:37:57,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-13T18:37:57,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-13T18:37:57,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-13T18:37:57,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-13T18:37:57,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-13T18:37:57,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-13T18:37:57,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-13T18:37:57,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-13T18:37:57,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-13T18:37:57,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-13T18:37:57,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-13T18:37:57,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-13T18:37:57,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-13T18:37:57,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-13T18:37:57,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-13T18:37:57,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-13T18:37:57,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-13T18:37:57,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-13T18:37:57,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-13T18:37:57,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-13T18:37:57,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-13T18:37:57,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-13T18:37:57,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-13T18:37:57,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-13T18:37:57,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-13T18:37:57,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-13T18:37:57,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-13T18:37:57,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-13T18:37:57,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-13T18:37:57,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-13T18:37:57,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-13T18:37:57,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-13T18:37:57,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-13T18:37:57,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-13T18:37:57,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-13T18:37:57,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-13T18:37:57,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-13T18:37:57,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-13T18:37:57,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-13T18:37:57,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-13T18:37:57,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-13T18:37:57,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-13T18:37:57,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-13T18:37:57,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-13T18:37:57,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-13T18:37:57,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-13T18:37:57,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-13T18:37:57,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-13T18:37:57,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-13T18:37:57,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-13T18:37:57,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-13T18:37:57,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-13T18:37:57,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-13T18:37:57,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-13T18:37:57,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-13T18:37:57,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-13T18:37:57,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-13T18:37:57,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-13T18:37:57,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-13T18:37:57,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-13T18:37:57,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-13T18:37:57,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-13T18:37:57,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-13T18:37:57,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-13T18:37:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-13T18:37:57,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-13T18:37:57,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-13T18:37:57,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-13T18:37:57,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-13T18:37:57,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-13T18:37:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-13T18:37:57,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-13T18:37:57,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-13T18:37:57,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-13T18:37:57,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-13T18:37:57,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-13T18:37:57,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-13T18:37:57,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-13T18:37:57,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-13T18:37:57,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-13T18:37:57,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-13T18:37:57,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-13T18:37:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-13T18:37:57,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-13T18:37:57,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-13T18:37:57,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-13T18:37:57,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-13T18:37:57,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-13T18:37:57,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-13T18:37:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-13T18:37:57,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-13T18:37:57,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-13T18:37:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-13T18:37:57,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-13T18:37:57,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-13T18:37:57,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-13T18:37:57,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-13T18:37:57,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-13T18:37:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-13T18:37:57,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-13T18:37:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-13T18:37:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-13T18:37:57,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-13T18:37:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-13T18:37:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-13T18:37:57,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-13T18:37:57,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-13T18:37:57,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-13T18:37:57,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-13T18:37:57,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-13T18:37:57,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-13T18:37:57,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-13T18:37:57,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-13T18:37:57,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-13T18:37:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-13T18:37:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-13T18:37:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-13T18:37:57,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-13T18:37:57,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-13T18:37:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-13T18:37:57,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-13T18:37:57,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-13T18:37:57,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-13T18:37:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-13T18:37:57,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-13T18:37:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-13T18:37:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-13T18:37:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-13T18:37:57,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-13T18:37:57,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-13T18:37:57,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-13T18:37:57,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-13T18:37:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-13T18:37:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-13T18:37:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-13T18:37:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-13T18:37:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-13T18:37:57,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-13T18:37:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-13T18:37:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-13T18:37:57,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-13T18:37:57,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-13T18:37:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-13T18:37:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-13T18:37:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-13T18:37:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-13T18:37:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-13T18:37:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-13T18:37:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-13T18:37:57,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-13T18:37:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-13T18:37:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-13T18:37:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-13T18:37:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-13T18:37:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-13T18:37:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-13T18:37:57,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-13T18:37:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-13T18:37:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-13T18:37:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-13T18:37:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-13T18:37:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-13T18:37:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-13T18:37:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-13T18:37:57,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-13T18:37:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-13T18:37:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-13T18:37:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-13T18:37:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-13T18:37:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-13T18:37:57,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-13T18:37:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-13T18:37:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-13T18:37:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-13T18:37:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-13T18:37:57,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-13T18:37:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-13T18:37:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-13T18:37:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-13T18:37:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-13T18:37:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-13T18:37:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-13T18:37:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-13T18:37:57,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-13T18:37:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-13T18:37:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-13T18:37:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-13T18:37:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-13T18:37:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-13T18:37:57,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-13T18:37:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-13T18:37:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-13T18:37:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-13T18:37:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-13T18:37:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-13T18:37:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-13T18:37:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-13T18:37:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-13T18:37:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-13T18:37:57,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-13T18:37:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-13T18:37:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-13T18:37:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-13T18:37:57,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-13T18:37:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-13T18:37:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-13T18:37:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-13T18:37:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-13T18:37:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-13T18:37:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-13T18:37:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-13T18:37:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-13T18:37:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-13T18:37:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-13T18:37:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-13T18:37:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-13T18:37:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-13T18:37:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-13T18:37:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-13T18:37:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-13T18:37:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-13T18:37:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-13T18:37:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-13T18:37:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-13T18:37:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-13T18:37:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-13T18:37:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-13T18:37:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-13T18:37:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-13T18:37:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-13T18:37:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-13T18:37:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-13T18:37:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-13T18:37:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-13T18:37:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-13T18:37:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-13T18:37:57,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-13T18:37:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-13T18:37:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-13T18:37:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-13T18:37:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-13T18:37:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-13T18:37:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-13T18:37:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-13T18:37:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-13T18:37:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-13T18:37:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-13T18:37:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-13T18:37:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-13T18:37:57,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-13T18:37:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-13T18:37:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-13T18:37:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-13T18:37:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-13T18:37:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-13T18:37:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-13T18:37:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-13T18:37:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-13T18:37:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-13T18:37:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-13T18:37:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-13T18:37:57,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-13T18:37:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-13T18:37:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-13T18:37:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-13T18:37:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-13T18:37:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-13T18:37:57,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-13T18:37:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-13T18:37:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-13T18:37:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-13T18:37:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-13T18:37:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-13T18:37:57,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-13T18:37:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-13T18:37:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-13T18:37:57,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-13T18:37:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-13T18:37:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-13T18:37:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-13T18:37:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-13T18:37:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-13T18:37:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-13T18:37:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-13T18:37:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-13T18:37:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-13T18:37:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-13T18:37:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-13T18:37:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-13T18:37:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-13T18:37:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-13T18:37:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-13T18:37:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-13T18:37:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-13T18:37:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-13T18:37:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-13T18:37:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-13T18:37:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-13T18:37:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-13T18:37:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-13T18:37:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-13T18:37:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-13T18:37:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-13T18:37:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-13T18:37:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-13T18:37:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-13T18:37:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-13T18:37:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-13T18:37:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-13T18:37:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-13T18:37:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-13T18:37:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-13T18:37:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-13T18:37:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-13T18:37:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-13T18:37:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-13T18:37:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-13T18:37:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-13T18:37:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-13T18:37:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-13T18:37:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-13T18:37:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-13T18:37:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-13T18:37:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-13T18:37:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-13T18:37:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-13T18:37:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-13T18:37:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-13T18:37:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-13T18:37:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-13T18:37:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-13T18:37:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-13T18:37:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-13T18:37:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-13T18:37:57,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-13T18:37:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-13T18:37:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-13T18:37:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-13T18:37:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-13T18:37:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-13T18:37:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-13T18:37:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-13T18:37:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-13T18:37:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-13T18:37:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-13T18:37:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-13T18:37:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-13T18:37:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-13T18:37:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-13T18:37:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-13T18:37:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-13T18:37:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-13T18:37:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-13T18:37:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-13T18:37:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-13T18:37:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-13T18:37:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-13T18:37:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-13T18:37:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-13T18:37:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-13T18:37:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-13T18:37:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-13T18:37:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-13T18:37:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-13T18:37:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-13T18:37:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-13T18:37:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-13T18:37:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-13T18:37:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-13T18:37:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-13T18:37:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-13T18:37:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-13T18:37:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-13T18:37:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-13T18:37:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-13T18:37:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-13T18:37:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-13T18:37:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-13T18:37:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-13T18:37:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-13T18:37:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-13T18:37:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-13T18:37:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-13T18:37:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-13T18:37:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-13T18:37:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-13T18:37:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-13T18:37:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-13T18:37:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-13T18:37:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-13T18:37:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-13T18:37:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:57,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:57,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:57,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-13T18:37:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:57,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:57,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-13T18:37:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-13T18:37:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-13T18:37:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-13T18:37:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-13T18:37:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-13T18:37:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-13T18:37:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-13T18:37:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-13T18:37:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-13T18:37:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-13T18:37:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-13T18:37:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-13T18:37:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-13T18:37:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-13T18:37:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-13T18:37:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-13T18:37:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-13T18:37:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-13T18:37:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-13T18:37:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-13T18:37:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-13T18:37:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-13T18:37:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-13T18:37:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-13T18:37:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-13T18:37:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-13T18:37:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-13T18:37:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-13T18:37:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-13T18:37:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-13T18:37:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-13T18:37:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-13T18:37:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-13T18:37:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-13T18:37:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-13T18:37:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-13T18:37:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-13T18:37:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-13T18:37:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-13T18:37:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-13T18:37:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-13T18:37:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-13T18:37:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-13T18:37:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-13T18:37:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-13T18:37:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-13T18:37:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-13T18:37:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-13T18:37:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-13T18:37:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-13T18:37:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-13T18:37:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-13T18:37:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-13T18:37:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-13T18:37:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-13T18:37:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-13T18:37:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-13T18:37:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-13T18:37:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-13T18:37:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-13T18:37:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-13T18:37:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-13T18:37:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-13T18:37:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-13T18:37:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-13T18:37:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-13T18:37:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-13T18:37:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-13T18:37:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-13T18:37:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-13T18:37:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-13T18:37:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-13T18:37:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-13T18:37:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-13T18:37:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-13T18:37:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-13T18:37:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-13T18:37:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-13T18:37:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-13T18:37:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-13T18:37:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-13T18:37:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-13T18:37:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-13T18:37:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-13T18:37:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-13T18:37:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-13T18:37:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-13T18:37:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-13T18:37:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-13T18:37:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-13T18:37:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-13T18:37:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-13T18:37:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-13T18:37:58,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-13T18:37:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-13T18:37:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-13T18:37:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-13T18:37:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-13T18:37:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-13T18:37:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-13T18:37:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-13T18:37:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-13T18:37:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-13T18:37:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-13T18:37:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-13T18:37:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-13T18:37:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-13T18:37:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-13T18:37:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-13T18:37:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-13T18:37:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-13T18:37:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-13T18:37:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-13T18:37:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-13T18:37:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-13T18:37:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-13T18:37:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-13T18:37:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-13T18:37:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-13T18:37:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-13T18:37:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-13T18:37:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-13T18:37:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-13T18:37:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-13T18:37:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-13T18:37:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-13T18:37:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-13T18:37:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-13T18:37:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-13T18:37:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-13T18:37:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-13T18:37:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-13T18:37:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-13T18:37:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-13T18:37:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-13T18:37:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-13T18:37:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-13T18:37:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-13T18:37:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-13T18:37:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-13T18:37:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-13T18:37:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-13T18:37:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-13T18:37:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-13T18:37:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-13T18:37:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-13T18:37:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-13T18:37:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-13T18:37:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-13T18:37:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-13T18:37:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:37:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-13T18:37:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-13T18:37:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:37:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-13T18:37:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-13T18:37:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:37:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-13T18:37:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:37:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-13T18:37:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-13T18:37:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:37:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-13T18:37:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-13T18:37:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-13T18:37:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-13T18:37:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-13T18:37:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-13T18:37:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-13T18:37:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-13T18:37:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-13T18:37:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-13T18:37:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-13T18:37:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-13T18:37:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-13T18:37:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-13T18:37:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-13T18:37:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-13T18:37:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-13T18:37:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-13T18:37:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-13T18:37:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-13T18:37:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-13T18:37:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-13T18:37:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-13T18:37:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-13T18:37:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-13T18:37:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-13T18:37:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-13T18:37:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-13T18:37:58,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-13T18:37:58,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-13T18:37:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-13T18:37:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-13T18:37:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-13T18:37:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-13T18:37:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-13T18:37:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-13T18:37:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:37:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-13T18:37:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-13T18:37:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:37:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:37:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-13T18:37:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-13T18:37:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-13T18:37:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:37:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-13T18:37:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-13T18:37:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-13T18:37:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-13T18:37:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-13T18:37:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-13T18:37:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-13T18:37:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-13T18:37:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-13T18:37:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-13T18:37:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-13T18:37:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-13T18:37:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-13T18:37:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-13T18:37:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-13T18:37:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-13T18:37:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-13T18:37:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-13T18:37:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-13T18:37:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-13T18:37:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-13T18:37:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-13T18:37:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-13T18:37:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-13T18:37:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-13T18:37:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-13T18:37:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-13T18:37:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:37:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:37:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-13T18:37:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-13T18:37:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:37:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:37:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-13T18:37:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-13T18:37:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-13T18:37:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-13T18:37:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-13T18:37:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-13T18:37:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-13T18:37:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-13T18:37:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:37:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-13T18:37:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:37:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-13T18:37:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:37:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:37:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-13T18:37:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-13T18:37:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-13T18:37:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-13T18:37:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-13T18:37:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-13T18:37:58,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-13T18:37:58,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-13T18:37:58,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-13T18:37:58,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:37:58,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-13T18:37:58,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-13T18:37:58,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:37:58,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-13T18:37:58,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-13T18:37:58,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-13T18:37:58,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-13T18:37:58,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:37:58,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-13T18:37:58,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-13T18:37:58,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-13T18:37:58,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-13T18:37:58,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-13T18:37:58,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-13T18:37:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-13T18:37:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-13T18:37:58,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-13T18:37:58,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-13T18:37:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-13T18:37:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-13T18:37:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-13T18:37:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-13T18:37:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-13T18:37:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-13T18:37:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-13T18:37:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-13T18:37:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-13T18:37:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-13T18:37:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-13T18:37:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-13T18:37:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-13T18:37:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-13T18:37:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-13T18:37:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-13T18:37:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-13T18:37:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-13T18:37:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-13T18:37:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-13T18:37:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1658653737=0, srv2069310312=1} racks are {rack=0} 2024-11-13T18:37:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:37:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:37:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:37:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:37:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:37:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:37:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:37:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:37:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:37:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:37:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:37:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:37:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:37:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:37:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:37:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:37:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:37:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1947956254=1, srv1244512685=0} racks are {rack=0} 2024-11-13T18:37:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:37:58,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv146427478=1, srv825583603=2, srv130681205=0} racks are {rack=0} 2024-11-13T18:37:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv146427478=1, srv825583603=2, srv130681205=0} racks are {rack=0} 2024-11-13T18:37:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1777528136=1, srv984797998=2, srv1173829649=0} racks are {rack=0} 2024-11-13T18:37:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1777528136=1, srv984797998=2, srv1173829649=0} racks are {rack=0} 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1777528136=1, srv984797998=2, srv1173829649=0} racks are {rack=0} 2024-11-13T18:37:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv542103552=1, srv562751114=2, srv1163222730=0} racks are {rack=0} 2024-11-13T18:37:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv542103552=1, srv562751114=2, srv1163222730=0} racks are {rack=0} 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv287800714=2, srv2137974405=1, srv2018403583=0} racks are {rack=0} 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv287800714=2, srv2137974405=1, srv2018403583=0} racks are {rack=0} 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv287800714=2, srv2137974405=1, srv2018403583=0} racks are {rack=0} 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv477532266=1, srv1866522928=0, srv506760322=2} racks are {rack=0} 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv477532266=1, srv1866522928=0, srv506760322=2} racks are {rack=0} 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv477532266=1, srv1866522928=0, srv506760322=2} racks are {rack=0} 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv477532266=1, srv1866522928=0, srv506760322=2} racks are {rack=0} 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:37:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:37:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:37:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016439473=1, srv808116739=2, srv1004457967=0} racks are {rack=0} 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-13T18:37:58,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225928056=1, srv617432680=3, srv1231383358=0, srv264905922=2} racks are {rack=0} 2024-11-13T18:37:58,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225928056=1, srv617432680=3, srv1231383358=0, srv264905922=2} racks are {rack=0} 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225928056=1, srv617432680=3, srv1231383358=0, srv264905922=2} racks are {rack=0} 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1974706020=1, srv2065678216=2, srv913379277=3, srv1064709669=0} racks are {rack=0} 2024-11-13T18:37:58,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1974706020=1, srv2065678216=2, srv913379277=3, srv1064709669=0} racks are {rack=0} 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1974706020=1, srv2065678216=2, srv913379277=3, srv1064709669=0} racks are {rack=0} 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1974706020=1, srv2065678216=2, srv913379277=3, srv1064709669=0} racks are {rack=0} 2024-11-13T18:37:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1679938982=2, srv1346895138=1, srv1191778715=0, srv419540854=3} racks are {rack=0} 2024-11-13T18:37:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1679938982=2, srv1346895138=1, srv1191778715=0, srv419540854=3} racks are {rack=0} 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1679938982=2, srv1346895138=1, srv1191778715=0, srv419540854=3} racks are {rack=0} 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1679938982=2, srv1346895138=1, srv1191778715=0, srv419540854=3} racks are {rack=0} 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1679938982=2, srv1346895138=1, srv1191778715=0, srv419540854=3} racks are {rack=0} 2024-11-13T18:37:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv139889709=1, srv1399404331=2, srv546657826=3, srv1148612888=0} racks are {rack=0} 2024-11-13T18:37:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv139889709=1, srv1399404331=2, srv546657826=3, srv1148612888=0} racks are {rack=0} 2024-11-13T18:37:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv139889709=1, srv1399404331=2, srv546657826=3, srv1148612888=0} racks are {rack=0} 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv139889709=1, srv1399404331=2, srv546657826=3, srv1148612888=0} racks are {rack=0} 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv139889709=1, srv1399404331=2, srv546657826=3, srv1148612888=0} racks are {rack=0} 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv139889709=1, srv1399404331=2, srv546657826=3, srv1148612888=0} racks are {rack=0} 2024-11-13T18:37:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2132948465=2, srv1589753011=1, srv158705677=0, srv741749937=3} racks are {rack=0} 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2132948465=2, srv1589753011=1, srv158705677=0, srv741749937=3} racks are {rack=0} 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2132948465=2, srv1589753011=1, srv158705677=0, srv741749937=3} racks are {rack=0} 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2132948465=2, srv1589753011=1, srv158705677=0, srv741749937=3} racks are {rack=0} 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2132948465=2, srv1589753011=1, srv158705677=0, srv741749937=3} racks are {rack=0} 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2132948465=2, srv1589753011=1, srv158705677=0, srv741749937=3} racks are {rack=0} 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1770432145=0, srv1949326073=1, srv394556022=3, srv1954634747=2} racks are {rack=0} 2024-11-13T18:37:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1770432145=0, srv1949326073=1, srv394556022=3, srv1954634747=2} racks are {rack=0} 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1770432145=0, srv1949326073=1, srv394556022=3, srv1954634747=2} racks are {rack=0} 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1770432145=0, srv1949326073=1, srv394556022=3, srv1954634747=2} racks are {rack=0} 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1770432145=0, srv1949326073=1, srv394556022=3, srv1954634747=2} racks are {rack=0} 2024-11-13T18:37:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1770432145=0, srv1949326073=1, srv394556022=3, srv1954634747=2} racks are {rack=0} 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1910261347=2, srv788429081=3, srv1186935372=0, srv1667482057=1} racks are {rack=0} 2024-11-13T18:37:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1910261347=2, srv788429081=3, srv1186935372=0, srv1667482057=1} racks are {rack=0} 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1910261347=2, srv788429081=3, srv1186935372=0, srv1667482057=1} racks are {rack=0} 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1910261347=2, srv788429081=3, srv1186935372=0, srv1667482057=1} racks are {rack=0} 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1910261347=2, srv788429081=3, srv1186935372=0, srv1667482057=1} racks are {rack=0} 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1910261347=2, srv788429081=3, srv1186935372=0, srv1667482057=1} racks are {rack=0} 2024-11-13T18:37:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1834333567=2, srv565348222=3, srv1149502645=0, srv1411371084=1} racks are {rack=0} 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1834333567=2, srv565348222=3, srv1149502645=0, srv1411371084=1} racks are {rack=0} 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1834333567=2, srv565348222=3, srv1149502645=0, srv1411371084=1} racks are {rack=0} 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1834333567=2, srv565348222=3, srv1149502645=0, srv1411371084=1} racks are {rack=0} 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1834333567=2, srv565348222=3, srv1149502645=0, srv1411371084=1} racks are {rack=0} 2024-11-13T18:37:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1834333567=2, srv565348222=3, srv1149502645=0, srv1411371084=1} racks are {rack=0} 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1834333567=2, srv565348222=3, srv1149502645=0, srv1411371084=1} racks are {rack=0} 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1341250415=1, srv749446543=3, srv1012760992=0, srv65248843=2} racks are {rack=0} 2024-11-13T18:37:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1341250415=1, srv749446543=3, srv1012760992=0, srv65248843=2} racks are {rack=0} 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1341250415=1, srv749446543=3, srv1012760992=0, srv65248843=2} racks are {rack=0} 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1341250415=1, srv749446543=3, srv1012760992=0, srv65248843=2} racks are {rack=0} 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1341250415=1, srv749446543=3, srv1012760992=0, srv65248843=2} racks are {rack=0} 2024-11-13T18:37:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1341250415=1, srv749446543=3, srv1012760992=0, srv65248843=2} racks are {rack=0} 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1341250415=1, srv749446543=3, srv1012760992=0, srv65248843=2} racks are {rack=0} 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:37:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1341250415=1, srv749446543=3, srv1012760992=0, srv65248843=2} racks are {rack=0} 2024-11-13T18:37:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1640184511=0, srv1910189929=2, srv1728459835=1, srv395597470=3} racks are {rack=0} 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1640184511=0, srv1910189929=2, srv1728459835=1, srv395597470=3} racks are {rack=0} 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1640184511=0, srv1910189929=2, srv1728459835=1, srv395597470=3} racks are {rack=0} 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1640184511=0, srv1910189929=2, srv1728459835=1, srv395597470=3} racks are {rack=0} 2024-11-13T18:37:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1640184511=0, srv1910189929=2, srv1728459835=1, srv395597470=3} racks are {rack=0} 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1640184511=0, srv1910189929=2, srv1728459835=1, srv395597470=3} racks are {rack=0} 2024-11-13T18:37:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1640184511=0, srv1910189929=2, srv1728459835=1, srv395597470=3} racks are {rack=0} 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-13T18:37:58,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv871339109=4, srv200064842=2, srv792578575=3, srv1931177811=1, srv1147910884=0} racks are {rack=0} 2024-11-13T18:37:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-13T18:37:58,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv871339109=4, srv200064842=2, srv792578575=3, srv1931177811=1, srv1147910884=0} racks are {rack=0} 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv871339109=4, srv200064842=2, srv792578575=3, srv1931177811=1, srv1147910884=0} racks are {rack=0} 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-13T18:37:58,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv871339109=4, srv200064842=2, srv792578575=3, srv1931177811=1, srv1147910884=0} racks are {rack=0} 2024-11-13T18:37:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-13T18:37:58,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-13T18:37:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-13T18:37:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-13T18:37:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-13T18:37:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-13T18:37:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-13T18:37:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-13T18:37:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-13T18:37:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-13T18:37:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-13T18:37:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-13T18:37:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-13T18:37:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-13T18:37:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-13T18:37:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-13T18:37:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-13T18:37:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-13T18:37:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-13T18:37:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-13T18:37:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-13T18:37:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-13T18:37:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-13T18:37:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-13T18:37:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-13T18:37:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-13T18:37:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-13T18:37:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-13T18:37:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-13T18:37:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-13T18:37:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-13T18:37:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-13T18:37:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-13T18:37:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-13T18:37:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-13T18:37:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-13T18:37:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-13T18:37:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-13T18:37:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-13T18:37:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-13T18:37:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-13T18:37:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-13T18:37:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-13T18:37:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-13T18:37:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-13T18:37:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-13T18:37:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-13T18:37:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-13T18:37:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-13T18:37:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-13T18:37:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-13T18:37:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-13T18:37:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-13T18:37:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-13T18:37:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-13T18:37:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-13T18:37:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-13T18:37:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-13T18:37:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-13T18:37:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-13T18:37:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-13T18:37:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-13T18:37:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-13T18:37:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-13T18:37:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-13T18:37:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-13T18:37:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-13T18:37:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-13T18:37:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-13T18:37:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-13T18:37:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-13T18:37:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-13T18:37:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-13T18:37:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-13T18:37:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-13T18:37:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-13T18:37:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-13T18:37:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-13T18:37:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-13T18:37:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-13T18:37:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-13T18:37:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-13T18:37:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-13T18:37:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-13T18:37:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-13T18:37:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-13T18:37:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-13T18:37:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-13T18:37:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-13T18:37:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-13T18:37:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-13T18:37:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-13T18:37:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-13T18:37:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-13T18:37:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-13T18:37:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-13T18:37:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:58,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:58,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:58,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-13T18:37:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-13T18:37:59,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-13T18:37:59,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-13T18:37:59,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-13T18:37:59,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-13T18:37:59,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-13T18:37:59,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-13T18:37:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-13T18:37:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-13T18:37:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-13T18:37:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-13T18:37:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-13T18:37:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-13T18:37:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-13T18:37:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-13T18:37:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-13T18:37:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-13T18:37:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-13T18:37:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-13T18:37:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-13T18:37:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-13T18:37:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-13T18:37:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-13T18:37:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-13T18:37:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-13T18:37:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-13T18:37:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-13T18:37:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-13T18:37:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-13T18:37:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-13T18:37:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-13T18:37:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-13T18:37:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-13T18:37:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-13T18:37:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-13T18:37:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-13T18:37:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-13T18:37:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-13T18:37:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-13T18:37:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-13T18:37:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-13T18:37:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-13T18:37:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-13T18:37:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-13T18:37:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-13T18:37:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-13T18:37:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-13T18:37:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-13T18:37:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-13T18:37:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-13T18:37:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-13T18:37:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-13T18:37:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-13T18:37:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-13T18:37:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:37:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-13T18:37:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-13T18:37:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-13T18:37:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-13T18:37:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-13T18:37:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-13T18:37:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-13T18:37:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-13T18:37:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-13T18:37:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-13T18:37:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-13T18:37:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-13T18:37:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-13T18:37:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-13T18:37:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-13T18:37:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-13T18:37:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-13T18:37:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-13T18:37:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-13T18:37:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-13T18:37:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:37:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-13T18:37:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-13T18:37:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-13T18:37:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-13T18:37:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-13T18:37:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:37:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-13T18:37:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:37:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:37:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:37:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:37:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-13T18:37:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-13T18:37:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-13T18:37:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-13T18:37:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-13T18:37:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-13T18:37:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-13T18:37:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-13T18:37:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-13T18:37:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-13T18:37:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-13T18:37:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-13T18:37:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-13T18:37:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-13T18:37:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-13T18:37:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-13T18:37:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-13T18:37:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-13T18:37:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-13T18:37:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-13T18:37:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-13T18:37:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-13T18:37:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-13T18:37:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-13T18:37:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-13T18:37:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-13T18:37:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1331179858=2, srv1217930446=0, srv1366310487=3, srv726429114=5, srv1272628633=1, srv380057362=4} racks are {rack=0} 2024-11-13T18:37:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-13T18:37:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-13T18:37:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-13T18:37:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-13T18:37:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-13T18:37:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-13T18:37:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-13T18:37:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-13T18:37:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-13T18:37:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-13T18:37:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-13T18:37:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-13T18:37:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-13T18:37:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-13T18:37:59,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-13T18:37:59,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-13T18:37:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-13T18:37:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-13T18:37:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-13T18:37:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-13T18:37:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-13T18:37:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-13T18:37:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-13T18:37:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-13T18:37:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-13T18:37:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-13T18:37:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-13T18:37:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-13T18:37:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-13T18:37:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-13T18:37:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-13T18:37:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-13T18:37:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-13T18:37:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-13T18:37:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-13T18:37:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-13T18:37:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-13T18:37:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-13T18:37:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-13T18:37:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-13T18:37:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-13T18:37:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-13T18:37:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-13T18:37:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-13T18:37:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-13T18:37:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-13T18:37:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-13T18:37:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-13T18:37:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-13T18:37:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-13T18:37:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-13T18:37:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-13T18:37:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-13T18:37:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-13T18:37:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-13T18:37:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-13T18:37:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-13T18:37:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-13T18:37:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-13T18:37:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-13T18:37:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-13T18:37:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-13T18:37:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-13T18:37:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-13T18:37:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-13T18:37:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-13T18:37:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-13T18:37:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-13T18:37:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-13T18:37:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-13T18:37:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-13T18:37:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-13T18:37:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-13T18:37:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-13T18:37:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-13T18:37:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-13T18:37:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-13T18:37:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-13T18:37:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-13T18:37:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-13T18:37:59,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-13T18:37:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-13T18:37:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-13T18:37:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-13T18:37:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-13T18:37:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-13T18:37:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-13T18:37:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-13T18:37:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-13T18:37:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-13T18:37:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-13T18:37:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-13T18:37:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-13T18:37:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-13T18:37:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-13T18:37:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-13T18:37:59,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-13T18:37:59,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-13T18:37:59,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-13T18:37:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:37:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-13T18:37:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-13T18:37:59,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:37:59,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-13T18:37:59,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-13T18:37:59,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-13T18:37:59,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-13T18:37:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-13T18:37:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-13T18:37:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-13T18:37:59,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-13T18:37:59,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-13T18:37:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:37:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:37:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:37:59,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:37:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-13T18:38:00,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-13T18:38:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-13T18:38:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-13T18:38:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-13T18:38:00,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-13T18:38:00,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-13T18:38:00,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-13T18:38:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-13T18:38:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-13T18:38:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-13T18:38:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-13T18:38:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:38:00,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-13T18:38:00,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-13T18:38:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:38:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-13T18:38:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-13T18:38:00,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-13T18:38:00,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-13T18:38:00,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-13T18:38:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:38:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:38:00,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:38:00,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:38:00,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-13T18:38:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-13T18:38:00,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-13T18:38:00,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-13T18:38:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-13T18:38:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-13T18:38:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:38:00,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-13T18:38:00,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-13T18:38:00,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:38:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-13T18:38:00,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-13T18:38:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-13T18:38:00,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-13T18:38:00,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-13T18:38:00,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-13T18:38:00,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-13T18:38:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-13T18:38:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-13T18:38:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-13T18:38:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-13T18:38:00,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-13T18:38:00,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-13T18:38:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-13T18:38:00,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-13T18:38:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-13T18:38:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-13T18:38:00,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-13T18:38:00,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-13T18:38:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-13T18:38:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv225233530=2, srv1598014012=0, srv348367074=3, srv7611099=4, srv1900528152=1, srv84863110=5} racks are {rack=0} 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-13T18:38:00,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv264752525=10, srv1608821297=3, srv1923369658=7, srv1927744907=8, srv580670855=11, srv1731734699=5, srv1772812924=6, srv1517478120=2, srv1609587645=4, srv763958052=12, srv1299922290=0, srv884380239=14, srv1340598289=1, srv861901799=13, srv194508724=9} racks are {rack=0} 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:00,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-13T18:38:00,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv240698875=6, srv1197671221=2, srv1030365313=1, srv1416053304=3, srv258394708=7, srv647683494=9, srv625861434=8, srv1025701755=0, srv1734065943=4, srv1745791493=5} racks are {rack=0} 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1440179715=0, srv1611145785=1, srv225859686=7, srv2053142490=5, srv992325550=9, srv204937461=4, srv2115260670=6, srv1966119527=3, srv1895492757=2, srv596694328=8} racks are {rack=0} 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1440179715=0, srv1611145785=1, srv225859686=7, srv2053142490=5, srv992325550=9, srv204937461=4, srv2115260670=6, srv1966119527=3, srv1895492757=2, srv596694328=8} racks are {rack=0} 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1440179715=0, srv1611145785=1, srv225859686=7, srv2053142490=5, srv992325550=9, srv204937461=4, srv2115260670=6, srv1966119527=3, srv1895492757=2, srv596694328=8} racks are {rack=0} 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1440179715=0, srv1611145785=1, srv225859686=7, srv2053142490=5, srv992325550=9, srv204937461=4, srv2115260670=6, srv1966119527=3, srv1895492757=2, srv596694328=8} racks are {rack=0} 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1440179715=0, srv1611145785=1, srv225859686=7, srv2053142490=5, srv992325550=9, srv204937461=4, srv2115260670=6, srv1966119527=3, srv1895492757=2, srv596694328=8} racks are {rack=0} 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1440179715=0, srv1611145785=1, srv225859686=7, srv2053142490=5, srv992325550=9, srv204937461=4, srv2115260670=6, srv1966119527=3, srv1895492757=2, srv596694328=8} racks are {rack=0} 2024-11-13T18:38:00,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:38:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:38:00,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:38:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:38:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:38:00,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:38:00,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:38:00,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:38:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:38:00,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323816786=7, srv149728370=1, srv2028514452=5, srv2079899920=6, srv1618292183=3, srv8488808=9, srv1573442958=2, srv1948491285=4, srv1381900780=0, srv364856026=8} racks are {rack=0} 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:38:00,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:38:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:38:00,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:38:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:38:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:38:00,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv159594965=3, srv1737847369=4, srv1307120663=1, srv2093573450=6, srv1164190851=0, srv1862648209=5, srv639192476=9, srv1392526804=2, srv450138306=8, srv439345692=7} racks are {rack=0} 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:38:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:38:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:38:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:38:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:38:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:38:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:38:00,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:38:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:38:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:38:00,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1001518497=0, srv1096377277=2, srv2008876709=6, srv1563575646=5, srv729174792=8, srv602371552=7, srv1200603842=4, srv825641903=9, srv1079960084=1, srv1192772059=3} racks are {rack=0} 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv404122974=4, srv66334852=6, srv881140814=8, srv103117967=0, srv1797712976=1, srv33373351=3, srv1804288050=2, srv452524665=5, srv786598981=7, srv933775564=9} racks are {rack=0} 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv286749621=4, srv1185463441=0, srv1488151523=3, srv1202258717=1, srv50072796=6, srv614206892=7, srv1254344375=2, srv828738042=9, srv364219515=5, srv674567775=8} racks are {rack=0} 2024-11-13T18:38:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv286749621=4, srv1185463441=0, srv1488151523=3, srv1202258717=1, srv50072796=6, srv614206892=7, srv1254344375=2, srv828738042=9, srv364219515=5, srv674567775=8} racks are {rack=0} 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv286749621=4, srv1185463441=0, srv1488151523=3, srv1202258717=1, srv50072796=6, srv614206892=7, srv1254344375=2, srv828738042=9, srv364219515=5, srv674567775=8} racks are {rack=0} 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv286749621=4, srv1185463441=0, srv1488151523=3, srv1202258717=1, srv50072796=6, srv614206892=7, srv1254344375=2, srv828738042=9, srv364219515=5, srv674567775=8} racks are {rack=0} 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv286749621=4, srv1185463441=0, srv1488151523=3, srv1202258717=1, srv50072796=6, srv614206892=7, srv1254344375=2, srv828738042=9, srv364219515=5, srv674567775=8} racks are {rack=0} 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv286749621=4, srv1185463441=0, srv1488151523=3, srv1202258717=1, srv50072796=6, srv614206892=7, srv1254344375=2, srv828738042=9, srv364219515=5, srv674567775=8} racks are {rack=0} 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv286749621=4, srv1185463441=0, srv1488151523=3, srv1202258717=1, srv50072796=6, srv614206892=7, srv1254344375=2, srv828738042=9, srv364219515=5, srv674567775=8} racks are {rack=0} 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv286749621=4, srv1185463441=0, srv1488151523=3, srv1202258717=1, srv50072796=6, srv614206892=7, srv1254344375=2, srv828738042=9, srv364219515=5, srv674567775=8} racks are {rack=0} 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1476732128=1, srv1799530844=3, srv1694778115=2, srv439548182=5, srv503217295=6, srv994782513=9, srv936525194=8, srv1078395410=0, srv1845251789=4, srv800918663=7} racks are {rack=0} 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1476732128=1, srv1799530844=3, srv1694778115=2, srv439548182=5, srv503217295=6, srv994782513=9, srv936525194=8, srv1078395410=0, srv1845251789=4, srv800918663=7} racks are {rack=0} 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1476732128=1, srv1799530844=3, srv1694778115=2, srv439548182=5, srv503217295=6, srv994782513=9, srv936525194=8, srv1078395410=0, srv1845251789=4, srv800918663=7} racks are {rack=0} 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1476732128=1, srv1799530844=3, srv1694778115=2, srv439548182=5, srv503217295=6, srv994782513=9, srv936525194=8, srv1078395410=0, srv1845251789=4, srv800918663=7} racks are {rack=0} 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1476732128=1, srv1799530844=3, srv1694778115=2, srv439548182=5, srv503217295=6, srv994782513=9, srv936525194=8, srv1078395410=0, srv1845251789=4, srv800918663=7} racks are {rack=0} 2024-11-13T18:38:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1476732128=1, srv1799530844=3, srv1694778115=2, srv439548182=5, srv503217295=6, srv994782513=9, srv936525194=8, srv1078395410=0, srv1845251789=4, srv800918663=7} racks are {rack=0} 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1476732128=1, srv1799530844=3, srv1694778115=2, srv439548182=5, srv503217295=6, srv994782513=9, srv936525194=8, srv1078395410=0, srv1845251789=4, srv800918663=7} racks are {rack=0} 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1476732128=1, srv1799530844=3, srv1694778115=2, srv439548182=5, srv503217295=6, srv994782513=9, srv936525194=8, srv1078395410=0, srv1845251789=4, srv800918663=7} racks are {rack=0} 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1476732128=1, srv1799530844=3, srv1694778115=2, srv439548182=5, srv503217295=6, srv994782513=9, srv936525194=8, srv1078395410=0, srv1845251789=4, srv800918663=7} racks are {rack=0} 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1611807869=2, srv1799513819=3, srv277866168=6, srv1424510652=1, srv83232356=8, srv1376411948=0, srv560477403=7, srv904370903=9, srv1840984404=4, srv2099428081=5} racks are {rack=0} 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-13T18:38:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-13T18:38:00,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-13T18:38:00,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-13T18:38:00,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-13T18:38:00,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-13T18:38:00,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:38:00,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-13T18:38:00,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238012638=6, srv947235847=9, srv382557700=7, srv192191476=4, srv71778302=8, srv1481591117=1, srv182082840=3, srv1111984082=0, srv2069740296=5, srv1550012279=2} racks are {rack=0} 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-13T18:38:00,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:38:00,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:38:00,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-13T18:38:00,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-13T18:38:00,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-13T18:38:00,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-13T18:38:00,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-13T18:38:00,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-13T18:38:00,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-13T18:38:00,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-13T18:38:00,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-13T18:38:00,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-13T18:38:00,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-13T18:38:00,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:38:00,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:38:00,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-13T18:38:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:38:00,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:38:00,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-13T18:38:00,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-13T18:38:00,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:38:00,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv359018906=5, srv1108834023=0, srv1513726762=2, srv15029278=1, srv327702511=4, srv1548218407=3, srv850870472=9, srv563052408=8, srv40610417=6, srv515313338=7} racks are {rack=0} 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-13T18:38:00,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2057078029=4, srv877041666=7, srv351929803=6, srv1973438433=3, srv1934106788=2, srv322341726=5, srv1595812109=0, srv1833675280=1} racks are {rack=0} 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146821847=3, srv993741953=7, srv1481899105=0, srv1689013256=2, srv707155274=4, srv946636940=6, srv870480313=5, srv1596963271=1} racks are {rack=0} 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:38:00,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-13T18:38:00,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-13T18:38:00,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-13T18:38:00,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-13T18:38:00,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-13T18:38:00,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-13T18:38:00,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-13T18:38:00,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-13T18:38:00,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-13T18:38:00,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:38:00,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:38:00,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:38:00,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1650766557=2, srv1742216243=3, srv248294840=5, srv1131823262=0, srv415835586=7, srv1350405186=1, srv2121726398=4, srv276676612=6} racks are {rack=0} 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:00,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:00,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-13T18:38:00,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-13T18:38:00,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:00,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:00,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:00,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:00,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:00,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:00,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-13T18:38:01,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-13T18:38:01,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-13T18:38:01,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-13T18:38:01,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:38:01,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:38:01,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:38:01,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:38:01,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-13T18:38:01,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:38:01,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:38:01,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:38:01,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1058605599=0, srv252271985=6, srv936549501=7, srv1454627838=1, srv1831716129=2, srv244350586=5, srv1903658778=3, srv1938052722=4} racks are {rack=0} 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-13T18:38:01,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1778642134=1, srv418710834=2, srv557372639=3, srv815250937=4, srv1763138750=0} racks are {rack=0} 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1778642134=1, srv418710834=2, srv557372639=3, srv815250937=4, srv1763138750=0} racks are {rack=0} 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1778642134=1, srv418710834=2, srv557372639=3, srv815250937=4, srv1763138750=0} racks are {rack=0} 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1778642134=1, srv418710834=2, srv557372639=3, srv815250937=4, srv1763138750=0} racks are {rack=0} 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1778642134=1, srv418710834=2, srv557372639=3, srv815250937=4, srv1763138750=0} racks are {rack=0} 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-13T18:38:01,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-13T18:38:01,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-13T18:38:01,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-13T18:38:01,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-13T18:38:01,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-13T18:38:01,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-13T18:38:01,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-13T18:38:01,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-13T18:38:01,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-13T18:38:01,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-13T18:38:01,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-13T18:38:01,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-13T18:38:01,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-13T18:38:01,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-13T18:38:01,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-13T18:38:01,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-13T18:38:01,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-13T18:38:01,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-13T18:38:01,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-13T18:38:01,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-13T18:38:01,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-13T18:38:01,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-13T18:38:01,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-13T18:38:01,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-13T18:38:01,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-13T18:38:01,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-13T18:38:01,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-13T18:38:01,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-13T18:38:01,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-13T18:38:01,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-13T18:38:01,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-13T18:38:01,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-13T18:38:01,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-13T18:38:01,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-13T18:38:01,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-13T18:38:01,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-13T18:38:01,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-13T18:38:01,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-13T18:38:01,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-13T18:38:01,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-13T18:38:01,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-13T18:38:01,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-13T18:38:01,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-13T18:38:01,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-13T18:38:01,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-13T18:38:01,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-13T18:38:01,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-13T18:38:01,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-13T18:38:01,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-13T18:38:01,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-13T18:38:01,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-13T18:38:01,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-13T18:38:01,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529032006=117, srv1982023977=209, srv1746991092=165, srv2126012011=236, srv950968102=381, srv585510235=309, srv1158946530=45, srv1587268275=133, srv1410441700=96, srv764423091=336, srv3772284=273, srv948952582=379, srv1104525298=25, srv845557794=351, srv283527870=253, srv188339301=196, srv314012026=262, srv2076594511=224, srv281664465=252, srv1662761452=146, srv452686685=287, srv1018493379=5, srv570423651=306, srv1134838040=34, srv1733420485=162, srv666144196=323, srv1142849234=39, srv471306147=290, srv893772115=366, srv1535252437=119, srv1093963132=22, srv1996161170=212, srv482461863=292, srv1669116256=147, srv1681098516=149, srv1519233889=114, srv767858709=338, srv330321158=265, srv1586530652=131, srv1941269988=205, srv308596121=259, srv769386844=340, srv936079938=377, srv1350607853=82, srv1553784091=123, srv487411170=294, srv300437854=256, srv1096035176=24, srv251156145=246, srv543867214=302, srv1066565822=14, srv533661288=300, srv134712761=81, srv1804843910=175, srv1885731102=198, srv755058217=334, srv1925437045=204, srv1432187461=98, srv1206667536=54, srv1440441504=99, srv731492586=331, srv1145809420=40, srv857126161=355, srv951259044=382, srv1872249005=194, srv1005908433=1, srv1537339005=120, srv589680764=312, srv1645002955=144, srv549047330=303, srv893545860=365, srv1247654818=60, srv135902550=85, srv217659904=240, srv1169216884=48, srv1146499181=41, srv1333658424=79, srv998780890=392, srv1488515773=109, srv666319964=324, srv933300175=376, srv1400666356=93, srv1601814062=134, srv911999879=371, srv1904172718=200, srv682695332=326, srv160651106=135, srv196210284=208, srv1869379873=190, srv1648097345=145, srv1720250677=161, srv1223952042=56, srv502269212=297, srv1137324142=36, srv1579090543=129, srv1573805462=128, srv1393360640=92, srv277128945=251, srv2018361945=215, srv2106316005=232, srv438861737=284, srv138965730=87, srv373836163=271, srv1558160471=124, srv214357227=238, srv1621619118=140, srv301870383=258, srv1247667023=61, srv970078569=385, srv1524635141=115, srv489259471=296, srv1059245312=11, srv1118917492=27, srv373949173=272, srv531834253=299, srv1799122302=173, srv590097379=313, srv949042807=380, srv426105728=281, srv326068938=264, srv968462571=384, srv1084556220=18, srv488782487=295, srv1120121920=29, srv1878562660=195, srv85824101=356, srv1382759647=86, srv437205275=283, srv851348442=353, srv1307918552=73, srv994613200=391, srv1307949058=74, srv6383959=319, srv393045060=276, srv1023440029=7, srv1294973474=70, srv1130921249=33, srv1627687724=142, srv1885222322=197, srv1539303077=121, srv1834231248=182, srv761663898=335, srv1817446366=178, srv1912598697=201, srv768775821=339, srv1846035538=184, srv1119134085=28, srv1260219300=65, srv2082370822=226, srv11284882=31, srv1411975016=97, srv943398017=378, srv37904222=274, srv649814877=321, srv95719450=383, srv580120710=308, srv435732970=282, srv1443830116=100, srv1761650934=169, srv577880174=307, srv1925274740=203, srv249351730=245, srv1252662848=64, srv443888584=285, srv1216449919=55, srv273005296=249, srv1004761600=0, srv1688207644=152, srv154488835=122, srv1826242078=180, srv890573350=362, srv1322423138=77, srv986061140=388, srv1583628613=130, srv206814691=222, srv588454812=311, srv1390413990=88, srv1753275983=166, srv1311739014=75, srv3464016=268, srv1853849994=185, srv861684218=358, srv1070257779=15, srv1162914548=46, srv83770712=349, srv892843319=364, srv1586953702=132, srv1616172246=137, srv112969166=32, srv1190222983=50, srv1853978816=186, srv1192591523=51, srv1087657598=20, srv1063850362=13, srv923077330=373, srv1153489185=43, srv1948994228=207, srv413330538=279, srv1017440404=4, srv2013254042=214, srv101159065=3, srv1021087685=6, srv47391673=291, srv1869931117=191, srv754814203=333, srv1988027097=210, srv1943190385=206, srv875624871=359, srv242299524=241, srv208358593=229, srv484937916=293, srv1154002387=44, srv929224657=374, srv1095577233=23, srv682161392=325, srv876975810=360, srv1813929791=177, srv1085469865=19, srv1681196425=150, srv123623596=59, srv2038882475=219, srv1073176877=16, srv1741904056=164, srv1871729025=193, srv1054115680=9, srv1493033445=110, srv2021054709=216, srv810799674=347, srv1532866614=118, srv127887826=67, srv1031546942=8, srv692482512=327, srv1644414555=143, srv353063432=269, srv542242751=301, srv1463397421=104, srv1923704986=202, srv1475276241=105, srv1074030434=17, srv1844855118=183, srv527762698=298, srv1495031420=111, srv333250128=267, srv838888789=350, srv97958001=387, srv892109539=363, srv14822731=107, srv1781709099=171, srv1451380677=101, srv1676383680=148, srv1297194576=71, srv1333329020=78, srv1613294692=136, srv276303157=250, srv1284505785=68, srv631071967=318, srv1737837978=163, srv11881085=49, srv5955803=315, srv920927449=372, srv557657271=304, srv64543624=320, srv2072191845=223, srv2135837763=237, srv650999216=322, srv1137046776=35, srv1110468721=26, srv246845806=244, srv902244773=370, srv11998111=52, srv1871635047=192, srv243965570=242, srv455745905=289, srv899568442=369, srv1346848648=80, srv854865550=354, srv408766433=277, srv1163488302=47, srv749253283=332, srv1286447856=69, srv1251600355=63, srv256624336=247, srv2008145319=213, srv319422385=263, srv1409306963=95, srv1391876489=89, srv931553888=375, srv152752783=116, srv244775156=243, srv794974128=346, srv1509926396=112, srv1802330259=174, srv2146743375=239, srv2096787476=230, srv1139148365=37, srv1618319509=139, srv2031104748=218, srv106038335=12, srv1890468102=199, srv712952135=329, srv1707406077=156, srv1563349163=126, srv20249303=217, srv1822288482=179, srv587664280=310, srv291096268=255, srv455499606=288, srv1867631956=188, srv389111138=275, srv894335899=367, srv1302630146=72, srv987710786=389, srv816982612=348, srv1515340352=113, srv2098322123=231, srv1480996181=106, srv1485686530=108, srv1686780035=151, srv291026963=254, srv1224503825=57, srv1782277747=172, srv208304444=228, srv2053631000=220, srv603612519=316, srv889587168=361, srv2120551772=234, srv186802339=189, srv765132536=337, srv1392814415=90, srv1715286890=158, srv2125840140=235, srv897903239=368, srv1697235438=155, srv423402134=280, srv1204091031=53, srv1056066986=10, srv1322116306=76, srv1865473144=187, srv793101491=345, srv1123137774=30, srv1461699460=102, srv1570620494=127, srv627117823=317, srv1827473224=181, srv1694315280=154, srv1232628738=58, srv171715972=160, srv1350836955=84, srv780016923=342, srv1715596180=159, srv1618270143=138, srv2079740390=225, srv2082422851=227, srv787835461=344, srv266401548=248, srv1146922695=42, srv1011378430=2, srv300857324=257, srv1690036152=153, srv972155308=386, srv1139680701=38, srv311616704=260, srv1560516076=125, srv332459913=266, srv410025903=278, srv1769111377=170, srv9935479=390, srv1276441627=66, srv717639222=330, srv365782476=270, srv1461995757=103, srv860295676=357, srv2060353552=221, srv849240421=352, srv56021488=305, srv591967045=314, srv313704591=261, srv1350768099=83, srv775126482=341, srv1994379314=211, srv1090254178=21, srv1758288547=167, srv446201717=286, srv1759228056=168, srv1392893503=91, srv180662785=176, srv1622259255=141, srv782776232=343, srv703600541=328, srv171020184=157, srv140899024=94, srv2110785309=233, srv1249408017=62} racks are {rack=0} 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:01,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:01,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:01,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:01,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:01,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:01,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:01,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:01,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:01,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:01,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:01,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:01,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:01,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:01,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:01,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:01,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:01,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-13T18:38:01,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:01,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-13T18:38:01,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-13T18:38:01,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:01,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:01,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:01,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:01,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:01,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1736436621=1, srv1006196459=0} racks are {rack=0} 2024-11-13T18:38:01,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:01,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:01,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:01,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:01,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-13T18:38:01,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:01,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=3200 2024-11-13T18:38:01,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 39 ms to try 3200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1606523324=0, srv2117187000=1} racks are {rack=0} 2024-11-13T18:38:02,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-13T18:38:02,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv430490061=0, srv678411572=1} racks are {rack=0} 2024-11-13T18:38:02,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-13T18:38:02,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1246983894=0, srv1921950767=1} racks are {rack=0} 2024-11-13T18:38:02,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=2, number of racks=1 2024-11-13T18:38:02,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1863701779=1, srv1009959344=0} racks are {rack=0} 2024-11-13T18:38:02,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=2, number of racks=1 2024-11-13T18:38:02,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.2888503755054882 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1354361574=0, srv216158494=1} racks are {rack=0} 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1914567659=0, srv945415976=1} racks are {rack=0} 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1508257276=0, srv194103347=1} racks are {rack=0} 2024-11-13T18:38:02,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=2, number of racks=1 2024-11-13T18:38:02,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.6932409012131716 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.8); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv620494474=1, srv1868884113=0} racks are {rack=0} 2024-11-13T18:38:02,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1432, number of hosts=2, number of racks=1 2024-11-13T18:38:02,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.40878413881917497 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4717368961973279); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903316533=0, srv945932091=1} racks are {rack=0} 2024-11-13T18:38:02,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=53, number of hosts=2, number of racks=1 2024-11-13T18:38:02,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.034662045060658585 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.04000000000000001); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1872520365=2, srv1292829797=0, srv135534900=1} racks are {rack=0} 2024-11-13T18:38:02,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-13T18:38:02,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608543, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-13T18:38:02,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 64 ms to try 7200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.5003035261608543 to a new imbalance of 0.004043905257076833. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv670836454=2, srv521019360=1, srv1844852657=0} racks are {rack=0} 2024-11-13T18:38:02,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-13T18:38:02,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.25015176308042714 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv74553895=2, srv1337522242=0, srv29040290=1} racks are {rack=0} 2024-11-13T18:38:02,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-13T18:38:02,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2888503755054882, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-13T18:38:02,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 43 ms to try 9600 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.2888503755054882 to a new imbalance of 0.0030329289428076256. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.25); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv722321074=2, srv1128325986=0, srv1168273928=1} racks are {rack=0} 2024-11-13T18:38:02,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-13T18:38:02,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-13T18:38:02,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 19 ms to try 7200 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv981661996=2, srv1911813368=0, srv610297648=1} racks are {rack=0} 2024-11-13T18:38:02,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=3, number of racks=1 2024-11-13T18:38:02,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-13T18:38:02,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 25 ms to try 9600 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1320837578=1, srv1283669758=0, srv1980714746=2} racks are {rack=0} 2024-11-13T18:38:02,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=20, number of hosts=3, number of racks=1 2024-11-13T18:38:02,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.42216593343109815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=96000 2024-11-13T18:38:02,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 223 ms to try 96000 different iterations. Found a solution that moves 13 regions; Going from a computed imbalance of 0.42216593343109815 to a new imbalance of 0.003942807625649913. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.325); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv177487667=0, srv249393103=2, srv346162884=3, srv1953100540=1} racks are {rack=0} 2024-11-13T18:38:02,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=4, number of racks=1 2024-11-13T18:38:02,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.25526148491585815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-13T18:38:02,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 36 ms to try 19200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.25526148491585815 to a new imbalance of 0.0020219526285384167. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1291438471=0, srv1557635012=1, srv817196344=3, srv239322468=2} racks are {rack=0} 2024-11-13T18:38:02,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=4, number of racks=1 2024-11-13T18:38:02,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=12800 2024-11-13T18:38:02,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 17 ms to try 12800 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.009098786828422877. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.75); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv455160735=2, srv112818252=0, srv1365706241=1, srv941771362=3} racks are {rack=0} 2024-11-13T18:38:02,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=4, number of racks=1 2024-11-13T18:38:02,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=16000 2024-11-13T18:38:02,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 28 ms to try 16000 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.007279029462738302. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv91146854=3, srv1717979023=0, srv795752547=2, srv2061792725=1} racks are {rack=0} 2024-11-13T18:38:02,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-13T18:38:02,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608542, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=38400 2024-11-13T18:38:02,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 55 ms to try 38400 different iterations. Found a solution that moves 6 regions; Going from a computed imbalance of 0.5003035261608542 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1864457886=1, srv1088410283=0, srv803905849=2, srv872572951=3} racks are {rack=0} 2024-11-13T18:38:02,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-13T18:38:02,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6127441778046339, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=25600 2024-11-13T18:38:02,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 33 ms to try 25600 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6127441778046339 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2076189309=0, srv2124712002=1, srv556440332=2, srv808377465=3} racks are {rack=0} 2024-11-13T18:38:02,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-13T18:38:02,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6852343510309111, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-13T18:38:02,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 35 ms to try 22400 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6852343510309111 to a new imbalance of 0.006932409012131715. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5714285714285714); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv577053272=2, srv73569364=3, srv1809702464=0, srv542466879=1} racks are {rack=0} 2024-11-13T18:38:02,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-13T18:38:02,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-13T18:38:02,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 25 ms to try 19200 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1380793521=0, srv323702196=2, srv1657938214=1, srv697249866=3} racks are {rack=0} 2024-11-13T18:38:02,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-13T18:38:02,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0962834585018294 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.11111111111111113); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv667793504=2, srv1128909635=0, srv2129404459=1, srv727305225=3} racks are {rack=0} 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=4, number of racks=1 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.17331022530329285 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.19999999999999996); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2147109189=2, srv1662323170=1, srv1353221804=0, srv562338408=3} racks are {rack=0} 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-13T18:38:02,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 34 ms to try 22400 different iterations. Found a solution that moves 5 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008665511265164644. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.7142857142857143); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv485967307=4, srv1516785273=0, srv1651095798=1, srv1867572007=2, srv436036299=3} racks are {rack=0} 2024-11-13T18:38:02,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:02,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:02,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=5, number of racks=1 2024-11-13T18:38:02,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:02,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.22705408170595567 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.26202041028867284); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:02,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1415419477=2, srv1680697481=4, srv1528908034=3, srv1105740513=0, srv1308709051=1, srv852159443=5} racks are {rack=0} 2024-11-13T18:38:02,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:02,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:02,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:02,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:02,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:02,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:02,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:02,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:02,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:02,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:02,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:02,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:02,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-13T18:38:02,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462697); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:02,817 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:14448000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-13T18:38:02,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.38476461962415054, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462697); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-13T18:38:25,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 22756 ms to try 1000000 different iterations. Found a solution that moves 1015 regions; Going from a computed imbalance of 0.38476461962415054 to a new imbalance of 0.00409092741122889. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3372093023255814); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:25,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:25,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1894083750=2, srv1645901617=1, srv1913718564=3, srv1285024213=0, srv465033743=4, srv953473021=5} racks are {rack=0} 2024-11-13T18:38:25,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:25,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:25,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:25,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:25,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:25,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:25,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:25,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:25,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:25,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:25,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:25,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:25,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-13T18:38:25,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:25,648 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:16800000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-13T18:38:25,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2979275647131677, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-13T18:38:52,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 26945 ms to try 1000000 different iterations. Found a solution that moves 922 regions; Going from a computed imbalance of 0.2979275647131677 to a new imbalance of 0.003195840554592721. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.2634285714285714); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:52,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv66553813=11, srv1496309414=2, srv332975955=8, srv331976388=7, srv1699850564=4, srv266525657=6, srv903718350=14, srv359812370=9, srv686328197=12, srv19418791=5, srv1442301966=1, srv1125860287=0, srv835719977=13, srv15462280=3, srv48070694=10} racks are {rack=0} 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=15, number of hosts=15, number of racks=1 2024-11-13T18:38:52,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:52,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.12507588154021357 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.14433756729740646); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv688452381=6, srv885333569=9, srv1193829319=0, srv259067772=3, srv77140995=7, srv2059770859=1, srv820224857=8, srv2114133101=2, srv646937343=4, srv656083646=5} racks are {rack=0} 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:52,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-13T18:38:52,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 134 ms to try 80000 different iterations. Found a solution that moves 9 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010918544194107453. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1392768100=1, srv32030575=7, srv142775068=2, srv587912164=8, srv610794482=9, srv306184429=6, srv1281565368=0, srv1984419368=5, srv197390133=4, srv1676075724=3} racks are {rack=0} 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=10, number of racks=1 2024-11-13T18:38:52,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:52,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.055531997651093117 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.06408392528936147); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:52,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1028787341=0, srv170334971=3, srv7534028=8, srv2000085633=5, srv650207031=7, srv1451371380=1, srv1701694290=2, srv875581665=9, srv436199585=6, srv1763990842=4} racks are {rack=0} 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=54, number of hosts=10, number of racks=1 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:52,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164644, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=432000 2024-11-13T18:38:53,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 1002 ms to try 432000 different iterations. Found a solution that moves 48 regions; Going from a computed imbalance of 0.8665511265164644 to a new imbalance of 0.01078374735220489. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8888888888888888); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1686175994=3, srv618564114=9, srv287951135=8, srv1309186637=1, srv1083053259=0, srv1740444906=4, srv1866329152=5, srv1381151009=2, srv203842750=7, srv1890788891=6} racks are {rack=0} 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:53,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=55, number of hosts=10, number of racks=1 2024-11-13T18:38:53,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:53,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=440000 2024-11-13T18:38:54,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 999 ms to try 440000 different iterations. Found a solution that moves 49 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010808255868914448. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8909090909090909); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:54,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:54,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv784045947=9, srv1570490861=4, srv2095113403=7, srv1098162503=0, srv1139488042=1, srv2049478710=6, srv1326197156=2, srv776375445=8, srv1431846376=3, srv2021196523=5} racks are {rack=0} 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:54,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=10, number of racks=1 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:54,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=448000 2024-11-13T18:38:55,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 1213 ms to try 448000 different iterations. Found a solution that moves 50 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010831889081455806. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8928571428571429); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1062483420=1, srv398740566=6, srv1310620322=2, srv1380722623=3, srv1007424626=0, srv823106579=7, srv869171377=8, srv1640265219=5, srv95844247=9, srv1490273643=4} racks are {rack=0} 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:55,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=16, number of hosts=10, number of racks=1 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:55,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=128000 2024-11-13T18:38:56,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 270 ms to try 128000 different iterations. Found a solution that moves 14 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.01061525129982669. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.875); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv950575759=9, srv1880008460=6, srv1412539188=4, srv1381353060=3, srv115278718=2, srv1104662365=0, srv1801529420=5, srv207568966=7, srv735145799=8, srv1150568230=1} racks are {rack=0} 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=10, number of racks=1 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.30649131741006164 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.35369098029121115); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1601360123=0, srv2021977756=4, srv1710764518=1, srv2050221478=5, srv922001773=7, srv1947368941=3, srv1855637297=2, srv404748459=6, srv941027438=9, srv923134941=8} racks are {rack=0} 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:56,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=9, number of hosts=10, number of racks=1 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.34662045060658575 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.39999999999999997); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1775812458=3, srv1777774726=4, srv715993047=8, srv804370551=9, srv1206044822=2, srv680815659=7, srv329438093=5, srv462516530=6, srv1015414801=0, srv1143488947=1} racks are {rack=0} 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-13T18:38:56,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:56,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.3851338340073176 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.44444444444444453); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv316396937=6, srv605773703=7, srv1090799267=0, srv1464925460=1, srv1544950768=3, srv1751575580=5, srv1488362647=2, srv1692387954=4, srv767646325=8, srv921515907=9} racks are {rack=0} 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:56,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:56,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=123, number of hosts=10, number of racks=1 2024-11-13T18:38:56,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:56,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8002334382626535 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.923469387755102); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1357047284=1, srv221589632=4, srv620123373=5, srv965661191=9, srv1034707909=0, srv876343399=7, srv891779794=8, srv1915992765=3, srv1588021543=2, srv761559478=6} racks are {rack=0} 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:56,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:56,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=155, number of hosts=10, number of racks=1 2024-11-13T18:38:56,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:56,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8131812243798632 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9384111329343621); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1123631884=0, srv1639219154=1, srv2063762145=3, srv826650372=6, srv1808087860=2, srv844385198=7, srv75354449=5, srv49247895=4} racks are {rack=0} 2024-11-13T18:38:56,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-13T18:38:56,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:56,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.05755254949858986 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0664156421213727); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1807948736=1, srv1663473919=0, srv2010623302=3, srv463851600=5, srv700117473=6, srv1928163166=2, srv876738617=7, srv377038362=4} racks are {rack=0} 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-13T18:38:56,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:56,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.06673965003400768 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.07701755613924488); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746856808=4, srv986686921=7, srv1379935863=1, srv1701376001=3, srv1672482594=2, srv1072477801=0, srv2015714854=5, srv239343713=6} racks are {rack=0} 2024-11-13T18:38:56,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=130, number of hosts=8, number of racks=1 2024-11-13T18:38:56,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:56,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.28093705674099306 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.32420136347910594); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586114067=2, srv719417206=6, srv409161463=5, srv2145553411=3, srv345979666=4, srv1140672829=0, srv1572032245=1, srv79402429=7} racks are {rack=0} 2024-11-13T18:38:56,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=140, number of hosts=8, number of racks=1 2024-11-13T18:38:56,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-13T18:38:56,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.07533492111851356 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.08693649897076465); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1872963664=1, srv1299095303=0, srv870185988=3, srv1988112046=2, srv877924935=4} racks are {rack=0} 2024-11-13T18:38:56,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=5, number of racks=1 2024-11-13T18:38:56,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.21663778162911612, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-13T18:38:56,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 158 ms to try 80000 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.21663778162911612 to a new imbalance of 0.0024263431542461008. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.2); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-13T18:38:56,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv645033216=314, srv500438298=282, srv1913995136=176, srv815783448=348, srv1290451027=61, srv2001654295=194, srv2135919052=228, srv860932928=361, srv1785990353=153, srv438583896=270, srv827057886=351, srv1656428885=135, srv999294923=392, srv1474666254=95, srv38415895=261, srv329093233=254, srv82969782=353, srv954849341=381, srv1216745565=49, srv1910376785=175, srv670635718=316, srv321718337=252, srv620244561=310, srv567123280=298, srv7677587=340, srv480425463=277, srv1002847543=1, srv1732120883=147, srv315890254=248, srv838611717=356, srv2099911427=222, srv405257646=267, srv45591293=274, srv1026422038=7, srv19956603=192, srv1928379716=178, srv246744212=234, srv642155006=313, srv888249681=367, srv318095715=250, srv1737037150=149, srv1376077393=75, srv730782030=330, srv918730578=371, srv1602162961=127, srv108562909=22, srv262925585=238, srv81583135=349, srv9778446=387, srv739653290=332, srv1852856098=164, srv690829591=323, srv1476689882=97, srv1934343224=180, srv1030106679=9, srv844765054=357, srv1553357805=117, srv707777372=327, srv2097901322=221, srv754644270=336, srv897893012=368, srv204874889=204, srv1349328795=70, srv1238418706=51, srv628205750=311, srv934763184=376, srv1130160432=28, srv77749194=342, srv929426954=375, srv1046429187=13, srv471437973=276, srv2090009615=219, srv993925996=390, srv351395626=256, srv1156624241=33, srv602300061=307, srv1543896685=112, srv585892770=302, srv466240144=275, srv944583098=378, srv2050922288=205, srv1202808229=47, srv981400047=388, srv1162906523=35, srv328752383=253, srv226202782=230, srv2040003873=201, srv1059075168=16, srv1963451163=185, srv1032677151=11, srv594214018=306, srv1242195825=55, srv1379766247=76, srv1983357861=190, srv50191100=284, srv1403677744=82, srv130082141=63, srv1750289926=152, srv254139598=236, srv1373845583=74, srv2002882335=195, srv1140997149=31, srv1931120309=179, srv836673394=355, srv1900842970=171, srv1197622695=45, srv707387586=326, srv1612988001=129, srv811576234=347, srv1415615612=84, srv2045818740=202, srv397316277=265, srv764657038=338, srv1023787428=4, srv1554652431=118, srv404100865=266, srv587324505=303, srv1057881043=15, srv1563514766=120, srv1650380641=134, srv524747853=290, srv191011130=174, srv636927069=312, srv1027777339=8, srv198328995=189, srv832658982=354, srv1796316614=154, srv160266892=128, srv1257357783=58, srv1507817345=103, srv1516907776=107, srv972711475=385, srv1422664803=85, srv716846940=328, srv189674878=169, srv1385791450=78, srv1575054382=122, srv543485728=292, srv1161318273=34, srv1689076613=141, srv1828094553=159, srv2074876460=213, srv1372878768=73, srv673476023=317, srv755830700=337, srv2124470114=225, srv1188600154=43, srv98899322=389, srv1744586245=151, srv1807136314=155, srv1390463287=80, srv1474404491=94, srv794433536=345, srv747642932=334, srv1715863557=144, srv2081283470=216, srv2088705858=218, srv1381037243=77, srv768165025=341, srv2069602777=210, srv676279252=319, srv1526671205=110, srv2096385305=220, srv225193843=229, srv482974682=278, srv2000851995=193, srv558245184=295, srv1589040695=125, srv229470412=231, srv520819660=288, srv690829770=324, srv1485124622=99, srv2048572972=203, srv1897745635=170, srv243766068=233, srv796734455=346, srv1982522823=188, srv2074204467=212, srv1826561560=158, srv909023695=370, srv2130173912=226, srv1460534473=92, srv1879488598=166, srv1387847072=79, srv1618359586=131, srv1556643893=119, srv291053943=241, srv1185284187=42, srv1453408585=90, srv1317797510=66, srv439129096=271, srv193947827=181, srv1837765389=161, srv971301276=384, srv2032516128=200, srv1521974367=108, srv576137393=300, srv49020609=281, srv778727536=343, srv1438094669=86, srv307894683=247, srv1851948098=163, srv2020803982=197, srv2085170951=217, srv1683597826=139, srv1073404504=20, srv484303654=279, srv1942785873=182, srv128433729=60, srv580401456=301, srv391180778=263, srv863831031=364, srv1241615201=54, srv1063202704=18, srv726166482=329, srv1025586486=5, srv31977918=251, srv1530814079=111, srv198826724=191, srv533293312=291, srv1445378558=88, srv1474722600=96, srv5665991=297, srv1464275483=93, srv2069353851=209, srv394047806=264, srv303477596=245, srv1188860144=44, srv87773941=365, srv1623452727=133, srv92835666=374, srv1261344008=59, srv1486038309=100, srv1742534353=150, srv591233317=305, srv1511741981=105, srv295139619=243, srv413420793=268, srv452498971=272, srv500478046=283, srv316493809=249, srv878450023=366, srv78765446=344, srv2077250618=214, srv86362323=363, srv165748535=136, srv1457504086=91, srv2028598423=199, srv1893899879=168, srv1345194404=69, srv382486686=260, srv977610472=386, srv1392261191=81, srv1971886724=187, srv1026028453=6, srv1551550164=115, srv923287381=372, srv1720839200=145, srv1328816696=68, srv859689264=360, srv113731248=29, srv1166350173=37, srv731458151=331, srv927705211=373, srv587710248=304, srv845255889=358, srv258418715=237, srv675486357=318, srv1525560600=109, srv1247817756=56, srv2056625568=207, srv454094972=273, srv1705381406=143, srv1960006277=184, srv952950661=380, srv943212107=377, srv1239125080=52, srv1184910593=41, srv1548405069=114, srv1112672708=24, srv1918536723=177, srv1240684740=53, srv823846819=350, srv1138566147=30, srv1292433205=62, srv1175409830=38, srv523799334=289, srv1000894946=0, srv1129958607=27, srv297593154=244, srv391040081=262, srv1304042509=64, srv1448307234=89, srv1227565239=50, srv2131934482=227, srv1868685550=165, srv859087928=359, srv362119341=257, srv1125520640=25, srv508088770=286, srv2028116987=198, srv751166237=335, srv655934651=315, srv1619795525=132, srv998804410=391, srv2080036225=215, srv1356474165=71, srv37715586=259, srv1031187035=10, srv1946251993=183, srv1902052585=172, srv685865011=322, srv1052138521=14, srv1510228747=104, srv1846445451=162, srv1489786172=101, srv158093439=123, srv557069872=293, srv1021213497=3, srv156421661=121, srv2069787139=211, srv1737023294=148, srv1007940742=2, srv516838975=287, srv568760039=299, srv1724681537=146, srv1178653031=39, srv1500736938=102, srv680485731=320, srv331383650=255, srv696941928=325, srv951380390=379, srv1313066016=65, srv566412189=296, srv905462953=369, srv29481955=242, srv968539265=383, srv1552873750=116, srv828416494=352, srv1584236673=124, srv1668193881=137, srv1890274875=167, srv1809297241=156, srv1165298857=36, srv1042701353=12, srv424067095=269, srv304481561=246, srv48754055=280, srv602924881=308, srv265016871=239, srv276841502=240, srv1080815198=21, srv161466018=130, srv1819336642=157, srv2052243702=206, srv366239882=258, srv681691023=321, srv1198288451=46, srv1546356392=113, srv1702673362=142, srv1414887684=83, srv765444528=339, srv1833553341=160, srv60641894=309, srv2122888723=224, srv2103216563=223, srv1205965570=48, srv1669464912=138, srv1686874485=140, srv1484722545=98, srv863308315=362, srv151389748=106, srv1143092459=32, srv2064093475=208, srv747633484=333, srv253004282=235, srv239475930=232, srv106852306=19, srv1092779576=23, srv1061816951=17, srv1183540575=40, srv1326956822=67, srv1443180136=87, srv159046209=126, srv1129530882=26, srv507334113=285, srv1905504373=173, srv2008876640=196, srv1361956237=72, srv1966583788=186, srv557790407=294, srv955868330=382, srv1254596727=57} racks are {rack=0} 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-13T18:38:56,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-13T18:38:56,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-13T18:38:56,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-13T18:38:56,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-13T18:38:56,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-13T18:38:56,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-13T18:38:56,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-13T18:38:56,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-13T18:38:56,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-13T18:38:56,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-13T18:38:56,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-13T18:38:56,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-13T18:38:56,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-13T18:38:56,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=393, number of racks=1 2024-11-13T18:38:56,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999929); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:38:56,449 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:17606400 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-13T18:38:56,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164584, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999929); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-13T18:39:05,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 9011 ms to try 1000000 different iterations. Found a solution that moves 55 regions; Going from a computed imbalance of 0.8665511265164584 to a new imbalance of 0.011915077989601387. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9821428571428571); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T18:39:05,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-13T18:39:05,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-13T18:39:05,503 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=12 (was 12), OpenFileDescriptor=286 (was 286), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1004 (was 1218), ProcessCount=11 (was 11), AvailableMemoryMB=1851 (was 711) - AvailableMemoryMB LEAK? -